Another potentially interesting question we can try to answer is how much face representation we see across the task. In order to do so, we’ve trained a linear SVM classifier within subjects on the data from the smoothed FFA localizer to classify signal into faces, objects and scrambles. We can then apply that classifier to various facets of our data. For each of these analyses, we will look at the probability of the classifier predicting a face. If the classifier does indeed predict a face, we score that TR with a “1”, otherwise, it gets a “0”, meaning chance becomes 1/3 = .33.
First, we will apply it to each TR of individual trials. Trials are split into 4 bins based on accuracy and load, and averaged over those measures to create a single time course for each category. The classifier was also applied to each TR of a “template” for each condition. In this analysis, all trials in a given condition were averaged to create a prototypical example for the category. The classifier was then applied to those averages.
We can then look at the probability of classification across subjects. First, we look at it across all subjects, but then we can look at it across our working memory capacity groups.
Finally, we will relate these neural measures to behavior, both averaged over time and for each TR.
library(reshape2)
library(tidyverse)
## ── Attaching packages ──────────────────────────────────────────────────────────────────────────────────────── tidyverse 1.3.0 ──
## ✓ ggplot2 3.3.2 ✓ purrr 0.3.4
## ✓ tibble 3.0.3 ✓ dplyr 1.0.1
## ✓ tidyr 1.1.1 ✓ stringr 1.4.0
## ✓ readr 1.3.1 ✓ forcats 0.5.0
## ── Conflicts ─────────────────────────────────────────────────────────────────────────────────────────── tidyverse_conflicts() ──
## x dplyr::filter() masks stats::filter()
## x dplyr::lag() masks stats::lag()
library(patchwork)
load('data/behav.RData')
load('data/split_groups_info.RData')
load('data/DFR_split_groups_info.RData')
source("helper_fxns/split_into_groups.R")
source('helper_fxns/prep_trial_levels_for_plot.R')
source("helper_fxns/split_trial_type.R")
se <- function(x) {
sd(x,na.rm=TRUE)/sqrt(length(x[!is.na(x)]))
}
#classifier information
clf_acc <- read.csv('data/MVPA/HPC_unsmoothed/clf_acc.csv', header=FALSE)
best_c <- read.csv('data/MVPA/HPC_unsmoothed/best_C.csv', header=FALSE)
trial_acc <- read.csv("data/MVPA/HPC_unsmoothed/all_suj_acc.csv", header = FALSE)
colnames(trial_acc) <- c('low correct', 'low incorrect', 'high correct', 'high incorrect')
# averaages from template
averages_from_template <- list(high_correct = read.csv('data/MVPA/HPC_unsmoothed/all_suj_high_correct_avg.csv',header=FALSE),
high_incorrect = read.csv('data/MVPA/HPC_unsmoothed/all_suj_high_incorrect_avg.csv',header=FALSE),
low_correct = read.csv('data/MVPA/HPC_unsmoothed/all_suj_low_correct_avg.csv',header=FALSE),
low_incorrect = read.csv('data/MVPA/HPC_unsmoothed/all_suj_low_incorrect_avg.csv',header=FALSE))
# only want to look at subjects who have at least 4 low load incorrect trials for those analyses
min_low_incorrect <- which(trial_acc$`low incorrect` < 4)
averages_from_template[["low_incorrect"]][min_low_incorrect, 1:14] <- NA
averages_from_template[["high_load_correct_diff"]] <- averages_from_template[["high_correct"]][,1:14] - averages_from_template[["high_incorrect"]][,1:14]
averages_from_template[["low_load_correct_diff"]] <- averages_from_template[["low_correct"]][,1:14] - averages_from_template[["low_incorrect"]][,1:14]
# averages from individual trials
individual_trial_averages_probs <- list(
high_correct = read.csv('data/MVPA/HPC_unsmoothed/all_suj_high_correct_indiv_avg_probs.csv',header=FALSE),
high_incorrect = read.csv('data/MVPA/HPC_unsmoothed/all_suj_high_incorrect_indiv_avg_probs.csv',header=FALSE),
low_correct = read.csv('data/MVPA/HPC_unsmoothed/all_suj_low_correct_indiv_avg_probs.csv',header=FALSE),
low_incorrect = read.csv('data/MVPA/HPC_unsmoothed/all_suj_low_incorrect_indiv_avg_probs.csv',header=FALSE))
individual_trial_averages_probs[["low_incorrect"]][min_low_incorrect, 1:14] <- NA
individual_trial_averages_probs[["high_load_correct_diff"]] <- individual_trial_averages_probs[["high_correct"]][,1:14] - individual_trial_averages_probs[["high_incorrect"]][,1:14]
individual_trial_averages_probs[["low_load_correct_diff"]] <- individual_trial_averages_probs[["low_correct"]][,1:14] - individual_trial_averages_probs[["low_incorrect"]][,1:14]
# averages from individual trials
individual_trial_averages_preds <- list(
high_correct = read.csv('data/MVPA/HPC_unsmoothed/all_suj_high_correct_indiv_avg_preds.csv',header=FALSE),
high_incorrect = read.csv('data/MVPA/HPC_unsmoothed/all_suj_high_incorrect_indiv_avg_preds.csv',header=FALSE),
low_correct = read.csv('data/MVPA/HPC_unsmoothed/all_suj_low_correct_indiv_avg_preds.csv',header=FALSE),
low_incorrect = read.csv('data/MVPA/HPC_unsmoothed/all_suj_low_incorrect_indiv_avg_preds.csv',header=FALSE))
individual_trial_averages_preds[["low_incorrect"]][min_low_incorrect, 1:14] <- NA
individual_trial_averages_preds[["high_load_correct_diff"]] <- individual_trial_averages_preds[["high_correct"]][,1:14] - individual_trial_averages_preds[["high_incorrect"]][,1:14]
individual_trial_averages_preds[["low_load_correct_diff"]] <- individual_trial_averages_preds[["low_correct"]][,1:14] - individual_trial_averages_preds[["low_incorrect"]][,1:14]
averages_from_template2 <- list()
indiv_probs <- list()
indiv_preds <- list()
for (i in seq.int(1,6)){
averages_from_template2[[names(averages_from_template)[i]]] <- averages_from_template[[i]][c(1:9,11:170),]
indiv_preds[[names(averages_from_template)[i]]] <- individual_trial_averages_preds[[i]][c(1:9,11:170),]
indiv_probs[[names(averages_from_template)[i]]] <- individual_trial_averages_probs[[i]][c(1:9,11:170),]
for (ii in seq.int(1,14)){
averages_from_template2[[i]][is.nan(averages_from_template2[[i]][,ii]),ii] <- NA
indiv_probs[[i]][is.nan(indiv_probs[[i]][,ii]),ii] <- NA
indiv_preds[[i]][is.nan(indiv_preds[[i]][,ii]),ii] <- NA
}
averages_from_template2[[i]]$PTID <- constructs_fMRI$PTID[c(1:9,11:170)]
indiv_probs[[i]]$PTID <- constructs_fMRI$PTID[c(1:9,11:170)]
indiv_preds[[i]]$PTID <- constructs_fMRI$PTID[c(1:9,11:170)]
}
averages_from_template <- averages_from_template2
individual_trial_averages_preds <- indiv_preds
individual_trial_averages_probs <- indiv_probs
rm(averages_from_template2)
rm(indiv_preds)
rm(indiv_probs)
save(list=c("clf_acc", "best_c", "averages_from_template", "individual_trial_averages_probs","individual_trial_averages_preds"), file = "data/MVPA_HPC_unsmoothed.RData")
On average, we were able to classify faces with 42.2% accuracy (statistically significantly different from chance = 0.33). The classifier was trained on data from an independent FFA localizer. Data was extracted from the bilateral hippocampus. From that mask, the top 100 voxels based on the faces vs objects contrast in the overall subject GLM were selected as features for the classifier. The data used to train the classifier were shifted by 4.5 seconds to account for the hemodynamic delay.
A linear SVM classifer was used; the localizer task was split into 6 blocks of stimuli. These blocks were used in a pre-defined split for cross validation, where one block of each stimulus type was held out as a test set. Data were normalized within the training and test sets separately. Within this training set, another cross validation process was repeated to tune the cost of the model over the values [0.01, 0.1, 1, 10]. The best value of the cost function was used for each cross validation to score the classifier on the test set. The best classifer was also used to predict face presence at each TR during the DFR task.
We can see that there are a number of subjects for whom the classifier works below chance - we’ll remove those from the analysis
clf_acc$average <- rowMeans(clf_acc, na.rm = TRUE)
t.test(clf_acc$average,mu=0.33)
##
## One Sample t-test
##
## data: clf_acc$average
## t = 13.465, df = 168, p-value < 2.2e-16
## alternative hypothesis: true mean is not equal to 0.33
## 95 percent confidence interval:
## 0.4090651 0.4362321
## sample estimates:
## mean of x
## 0.4226486
ggplot(data = clf_acc, aes(x = average))+
geom_histogram()+
geom_vline(aes(xintercept=0.33), linetype="dotted")+
theme_classic()+
xlab("Average classifier accuracy")
## `stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
## Warning: Removed 1 rows containing non-finite values (stat_bin).
below_chance_subjs <- which(clf_acc$average < 0.33)
for (trial_type in seq.int(1,6)){
averages_from_template[[trial_type]][below_chance_subjs,] <- NA
individual_trial_averages_preds[[trial_type]][below_chance_subjs,] <- NA
individual_trial_averages_probs[[trial_type]][below_chance_subjs,] <- NA
}
template_preds_melt <- prep_trial_levels_for_plot(averages_from_template)
## Using level as id variables
individual_trial_probs_melt <- prep_trial_levels_for_plot(individual_trial_averages_probs)
## Using level as id variables
individual_trial_preds_melt <- prep_trial_levels_for_plot(individual_trial_averages_preds)
## Using level as id variables
The shape of the time course is different here than it was for the fusiform region - here, we’re well below chance for encoding, but start to see a significant probability during delay (starting around TR 8) and the probe.
Here, we’re seeing a similiar pattern to the fusform, where we see peaks of decoding accuracy around the encoding period and then probe period. However, unlike the fusiform, we’re also seeing above chance accuracy for all trial types during the delay period. We also see that during encoding, high load trials (regardless of accuracy) show a higher probability of having a face decoded than low load trials. There are no differences between trial types, however, during probe.
ggplot(data=individual_trial_probs_melt %>% filter(level %in% c("high_correct", "high_incorrect", "low_correct", "low_incorrect")),aes(x=TR,y=value))+
geom_line(aes(color=level))+
geom_line(aes(x=TR,y=0.33),color="black",linetype="dotted")+
geom_ribbon(aes(x=TR,ymin=se_min,ymax=se_max,fill=level),alpha=0.2)+
scale_x_continuous(breaks = c(1:14),labels=c(1:14))+
ylab("Probability of classifier predicting a face")+
theme_classic()
t.test(individual_trial_averages_probs[["high_correct"]]$V8,mu=0.33)
##
## One Sample t-test
##
## data: individual_trial_averages_probs[["high_correct"]]$V8
## t = 5.694, df = 148, p-value = 6.462e-08
## alternative hypothesis: true mean is not equal to 0.33
## 95 percent confidence interval:
## 0.3551646 0.3819152
## sample estimates:
## mean of x
## 0.3685399
t.test(individual_trial_averages_probs[["high_incorrect"]]$V8,mu=0.33)
##
## One Sample t-test
##
## data: individual_trial_averages_probs[["high_incorrect"]]$V8
## t = 3.4557, df = 148, p-value = 0.0007164
## alternative hypothesis: true mean is not equal to 0.33
## 95 percent confidence interval:
## 0.3475239 0.3943328
## sample estimates:
## mean of x
## 0.3709284
t.test(individual_trial_averages_probs[["low_correct"]]$V8,mu=0.33)
##
## One Sample t-test
##
## data: individual_trial_averages_probs[["low_correct"]]$V8
## t = 7.067, df = 148, p-value = 5.786e-11
## alternative hypothesis: true mean is not equal to 0.33
## 95 percent confidence interval:
## 0.3600353 0.3833528
## sample estimates:
## mean of x
## 0.3716941
t.test(individual_trial_averages_probs[["low_incorrect"]]$V8,mu=0.33)
##
## One Sample t-test
##
## data: individual_trial_averages_probs[["low_incorrect"]]$V8
## t = 1.6675, df = 31, p-value = 0.1055
## alternative hypothesis: true mean is not equal to 0.33
## 95 percent confidence interval:
## 0.3203119 0.4265338
## sample estimates:
## mean of x
## 0.3734228
encoding_level_avg <- data.frame(high = rowMeans(cbind(individual_trial_averages_probs[["high_correct"]]$V6, individual_trial_averages_probs[["high_incorrect"]]$V6), na.rm=TRUE), low = rowMeans(cbind(individual_trial_averages_probs[["low_correct"]]$V6, individual_trial_averages_probs[["low_incorrect"]]$V6),na.rm=TRUE))
t.test(encoding_level_avg$high,encoding_level_avg$low,paired=TRUE)
##
## Paired t-test
##
## data: encoding_level_avg$high and encoding_level_avg$low
## t = 2.3117, df = 148, p-value = 0.02218
## alternative hypothesis: true difference in means is not equal to 0
## 95 percent confidence interval:
## 0.003056925 0.039064483
## sample estimates:
## mean of the differences
## 0.0210607
encoding_acc_avg <- data.frame(correct = rowMeans(cbind(individual_trial_averages_probs[["high_correct"]]$V6, individual_trial_averages_probs[["low_correct"]]$V6), na.rm=TRUE), incorrect = rowMeans(cbind(individual_trial_averages_probs[["low_incorrect"]]$V6, individual_trial_averages_probs[["high_incorrect"]]$V6),na.rm=TRUE))
t.test(encoding_acc_avg$correct,encoding_acc_avg$incorrect,paired=TRUE)
##
## Paired t-test
##
## data: encoding_acc_avg$correct and encoding_acc_avg$incorrect
## t = -0.57533, df = 148, p-value = 0.5659
## alternative hypothesis: true difference in means is not equal to 0
## 95 percent confidence interval:
## -0.03251028 0.01784875
## sample estimates:
## mean of the differences
## -0.007330767
probe_data_indiv <- data.frame(high_correct=individual_trial_averages_probs[["high_correct"]]$V11,
high_incorrect = individual_trial_averages_probs[["high_incorrect"]]$V11,
low_correct = individual_trial_averages_probs[["low_correct"]]$V11,
low_incorrect = individual_trial_averages_probs[["low_incorrect"]]$V11)
probe_data_indiv <- melt(probe_data_indiv)
## No id variables; using all as measure variables
probe.aov <- aov(value ~ variable, data = probe_data_indiv)
summary(probe.aov)
## Df Sum Sq Mean Sq F value Pr(>F)
## variable 3 0.038 0.01258 0.972 0.406
## Residuals 475 6.150 0.01295
## 197 observations deleted due to missingness
TukeyHSD(probe.aov)
## Tukey multiple comparisons of means
## 95% family-wise confidence level
##
## Fit: aov(formula = value ~ variable, data = probe_data_indiv)
##
## $variable
## diff lwr upr p adj
## high_incorrect-high_correct -0.006098413 -0.04008498 0.02788816 0.9671240
## low_correct-high_correct -0.010263140 -0.04424971 0.02372343 0.8641721
## low_incorrect-high_correct -0.037046745 -0.09420206 0.02010857 0.3401774
## low_correct-high_incorrect -0.004164727 -0.03815130 0.02982184 0.9890740
## low_incorrect-high_incorrect -0.030948332 -0.08810365 0.02620699 0.5024478
## low_incorrect-low_correct -0.026783605 -0.08393892 0.03037171 0.6219639
It seems like there’s really not much difference between correct and incorrect trials in the hippocampus.
ggplot(data=individual_trial_probs_melt %>% filter(level %in% c("low_load_correct_diff","high_load_correct_diff")),aes(x=TR,y=value))+
geom_line(aes(x=TR,y=0), linetype="dotted")+
geom_line(aes(color=level))+
geom_ribbon(aes(x=TR,ymin=se_min,ymax=se_max,fill=level),alpha=0.2)+
scale_x_continuous(breaks = c(1:14),labels=c(1:14))+
ylab("Correct - Incorrect Diff in Probability of Classifying Faces")+
theme_classic()
In the templates, we see a similar structure as in the individual trials with peaks around encoding and probe, though there is below chance decoding during delay period. There are no differences between trial types during probe.
ggplot(data=template_preds_melt%>% filter(level %in% c("high_correct", "high_incorrect", "low_correct", "low_incorrect")),aes(x=TR,y=value))+
geom_line(aes(color=level))+
geom_line(aes(x=TR,y=0.33),color="black",linetype="dotted")+
geom_ribbon(aes(x=TR,ymin=se_min,ymax=se_max,fill=level),alpha=0.2)+
scale_x_continuous(breaks = c(1:14),labels=c(1:14))+
ylab("Probability of classifier predicting a face")+
theme_classic()
acc_data_probe <- data.frame(correct = rowMeans(cbind(averages_from_template[["high_correct"]]$V10,averages_from_template[["low_correct"]]$V10)),
incorrect = rowMeans(cbind(averages_from_template[["high_incorrect"]]$V10, averages_from_template[["low_incorrect"]]$V10)))
t.test(acc_data_probe$correct,acc_data_probe$incorrect, paired=TRUE)
##
## Paired t-test
##
## data: acc_data_probe$correct and acc_data_probe$incorrect
## t = 0.27851, df = 31, p-value = 0.7825
## alternative hypothesis: true difference in means is not equal to 0
## 95 percent confidence interval:
## -0.08232879 0.10837046
## sample estimates:
## mean of the differences
## 0.01302083
acc_data_late_probe <- data.frame(correct = rowMeans(cbind(averages_from_template[["high_correct"]]$V11,averages_from_template[["low_correct"]]$V11)),
incorrect = rowMeans(cbind(averages_from_template[["high_incorrect"]]$V11, averages_from_template[["low_incorrect"]]$V11)))
t.test(acc_data_late_probe$correct,acc_data_late_probe$incorrect, paired=TRUE)
##
## Paired t-test
##
## data: acc_data_late_probe$correct and acc_data_late_probe$incorrect
## t = -0.97535, df = 31, p-value = 0.3369
## alternative hypothesis: true difference in means is not equal to 0
## 95 percent confidence interval:
## -0.17709196 0.06250863
## sample estimates:
## mean of the differences
## -0.05729167
probe_data_template <- data.frame(high_correct=averages_from_template[["high_correct"]]$V11,
high_incorrect = averages_from_template[["high_incorrect"]]$V11,
low_correct = averages_from_template[["low_correct"]]$V11,
low_incorrect = averages_from_template[["low_incorrect"]]$V11)
probe_data_template <- melt(probe_data_template)
## No id variables; using all as measure variables
probe.aov <- aov(value ~ variable, data = probe_data_template)
summary(probe.aov)
## Df Sum Sq Mean Sq F value Pr(>F)
## variable 3 0.53 0.1768 1.197 0.31
## Residuals 475 70.13 0.1476
## 197 observations deleted due to missingness
TukeyHSD(probe.aov)
## Tukey multiple comparisons of means
## 95% family-wise confidence level
##
## Fit: aov(formula = value ~ variable, data = probe_data_template)
##
## $variable
## diff lwr upr p adj
## high_incorrect-high_correct -0.07382550 -0.1885948 0.04094380 0.3470572
## low_correct-high_correct -0.06040268 -0.1751720 0.05436662 0.5271580
## low_incorrect-high_correct -0.09298098 -0.2859889 0.10002689 0.6003969
## low_correct-high_incorrect 0.01342282 -0.1013465 0.12819212 0.9904693
## low_incorrect-high_incorrect -0.01915548 -0.2121634 0.17385240 0.9941201
## low_incorrect-low_correct -0.03257830 -0.2255862 0.16042958 0.9723827
Unlike in the other regions, there is no difference in the overall probability of predicting a face from the template vs individual trials.
compare_across_temp_indiv <- data.frame(template = rowMeans(cbind(averages_from_template[["high_correct"]]$V10,
averages_from_template[["high_incorrect"]]$V10,
averages_from_template[["low_correct"]]$V10)),
indiv = rowMeans(cbind(individual_trial_averages_probs[["high_correct"]]$V10,
individual_trial_averages_probs[["high_incorrect"]]$V10,
individual_trial_averages_probs[["low_correct"]]$V10)))
wilcox.test(compare_across_temp_indiv$template, compare_across_temp_indiv$indiv,paired=TRUE)
##
## Wilcoxon signed rank test with continuity correction
##
## data: compare_across_temp_indiv$template and compare_across_temp_indiv$indiv
## V = 5124, p-value = 0.3803
## alternative hypothesis: true location shift is not equal to 0
Similar to before, there aren’t any differences between correct and incorrect trials from the template.
ggplot(data=template_preds_melt %>% filter(level %in% c("low_load_correct_diff","high_load_correct_diff")),aes(x=TR,y=value))+
geom_line(aes(color=level))+
geom_line(aes(x=TR,y=0), linetype="dotted")+
geom_ribbon(aes(x=TR,ymin=se_min,ymax=se_max,fill=level),alpha=0.2)+
scale_x_continuous(breaks = c(1:14),labels=c(1:14))+
ylab("Correct - Incorrect Diff in Probability of Classifying Faces")+
theme_classic()
encoding_correct_diff_high <- data.frame(correct=averages_from_template[["high_correct"]]$V6, incorrect=averages_from_template[["high_incorrect"]]$V6)
probe_correct_diff_high <- data.frame(correct=averages_from_template[["high_correct"]]$V11, incorrect=averages_from_template[["high_incorrect"]]$V11)
t.test(encoding_correct_diff_high$correct, encoding_correct_diff_high$incorrect, paired=TRUE)
##
## Paired t-test
##
## data: encoding_correct_diff_high$correct and encoding_correct_diff_high$incorrect
## t = 1.2317, df = 148, p-value = 0.22
## alternative hypothesis: true difference in means is not equal to 0
## 95 percent confidence interval:
## -0.02906957 0.12526644
## sample estimates:
## mean of the differences
## 0.04809843
t.test(probe_correct_diff_high$correct, probe_correct_diff_high$incorrect, paired=TRUE)
##
## Paired t-test
##
## data: probe_correct_diff_high$correct and probe_correct_diff_high$incorrect
## t = 1.7293, df = 148, p-value = 0.08583
## alternative hypothesis: true difference in means is not equal to 0
## 95 percent confidence interval:
## -0.0105347 0.1581857
## sample estimates:
## mean of the differences
## 0.0738255
split_template <- split_trial_type(averages_from_template,WM_groups)
split_indiv_probs <- split_trial_type(individual_trial_averages_probs, WM_groups)
split_indiv_preds <- split_trial_type(individual_trial_averages_preds, WM_groups)
In high load incorrect trials, we see that medium capacity subjects show greater face classification than both low and high capacity subjects during encoding, and greater than low capacity subjects during low load correct trials.
indiv_avgs <- list()
for (i in seq.int(1,4)){
indiv_avgs[[i]] <- ggplot(data = split_indiv_probs[["avgs"]][[i]][["all"]])+
geom_line(aes(x=TR,y=mean,color=group))+
geom_ribbon(aes(x=TR,ymin=se_min,ymax=se_max,fill=group),alpha=0.2)+
geom_line(aes(x=TR,y=0.33),color="black",linetype="dotted")+
scale_x_continuous(breaks = c(1:14),labels=c(1:14))+
ggtitle(names(split_indiv_probs[["avgs"]])[i])+
ylab("Probability")+
theme_classic()
}
(indiv_avgs[[1]] + indiv_avgs[[2]]) / (indiv_avgs[[3]] + indiv_avgs[[4]])+
plot_layout(guides = "collect")+
plot_annotation(title="Probability of classifier predicting a face from individual trials")
print("encoding")
## [1] "encoding"
for (trial_type in seq.int(1,4)){
print(names(split_indiv_probs[["all_data"]])[trial_type])
temp.aov <- aov(split_indiv_probs[["all_data"]][[trial_type]][["all"]][,6] ~ split_indiv_probs[["all_data"]][[trial_type]][["all"]][,16])
print(summary(temp.aov))
print(TukeyHSD(temp.aov))
}
## [1] "high_correct"
## Df Sum Sq
## split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16] 2 0.0074
## Residuals 144 1.6762
## Mean Sq F value
## split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16] 0.003724 0.32
## Residuals 0.011640
## Pr(>F)
## split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16] 0.727
## Residuals
## Tukey multiple comparisons of means
## 95% family-wise confidence level
##
## Fit: aov(formula = split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 6] ~ split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16])
##
## $`split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16]`
## diff lwr upr p adj
## med-high 0.005804459 -0.04529620 0.05690512 0.9609070
## low-high -0.011471636 -0.06338133 0.04043806 0.8600523
## low-med -0.017276096 -0.06918579 0.03463360 0.7108993
##
## [1] "high_incorrect"
## Df Sum Sq Mean Sq
## split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16] 2 0.301 0.1507
## Residuals 144 3.268 0.0227
## F value Pr(>F)
## split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16] 6.638 0.00175 **
## Residuals
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## Tukey multiple comparisons of means
## 95% family-wise confidence level
##
## Fit: aov(formula = split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 6] ~ split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16])
##
## $`split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16]`
## diff lwr upr p adj
## med-high 0.091014384 0.01965781 0.16237096 0.0083452
## low-high -0.008782202 -0.08126850 0.06370410 0.9556479
## low-med -0.099796586 -0.17228289 -0.02731029 0.0039459
##
## [1] "low_correct"
## Df Sum Sq
## split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16] 2 0.0403
## Residuals 144 0.8345
## Mean Sq F value
## split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16] 0.020159 3.478
## Residuals 0.005795
## Pr(>F)
## split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16] 0.0335 *
## Residuals
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## Tukey multiple comparisons of means
## 95% family-wise confidence level
##
## Fit: aov(formula = split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 6] ~ split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16])
##
## $`split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16]`
## diff lwr upr p adj
## med-high 0.02489788 -0.01115898 0.060954738 0.2341946
## low-high -0.01541303 -0.05204074 0.021214690 0.5802367
## low-med -0.04031090 -0.07693862 -0.003683188 0.0271625
##
## [1] "low_incorrect"
## Df Sum Sq Mean Sq
## split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16] 2 0.0588 0.0294
## Residuals 29 0.6467 0.0223
## F value Pr(>F)
## split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16] 1.319 0.283
## Residuals
## 115 observations deleted due to missingness
## Tukey multiple comparisons of means
## 95% family-wise confidence level
##
## Fit: aov(formula = split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 6] ~ split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16])
##
## $`split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16]`
## diff lwr upr p adj
## med-high -0.04081788 -0.21217773 0.1305420 0.8274518
## low-high 0.05772474 -0.10799208 0.2234416 0.6691568
## low-med 0.09854262 -0.05253885 0.2496241 0.2572780
There are no group differences at encoding from the template.
template_avgs <- list()
for (i in seq.int(1,4)){
template_avgs[[i]] <- ggplot(data = split_template[["avgs"]][[i]][["all"]])+
geom_line(aes(x=TR,y=mean,color=group))+
geom_ribbon(aes(x=TR,ymin=se_min,ymax=se_max,fill=group),alpha=0.2)+
geom_line(aes(x=TR,y=0.33),color="black",linetype="dotted")+
scale_x_continuous(breaks = c(1:14),labels=c(1:14))+
ggtitle(names(split_template[["avgs"]])[i])+
ylab("Probability")+
theme_classic()
}
(template_avgs[[1]] + template_avgs[[2]]) / (template_avgs[[3]] + template_avgs[[4]])+
plot_layout(guides = "collect")+
plot_annotation(title="Probability of classifier predicting a face from trial templates")
for (trial_type in seq.int(1,4)){
print(names(split_template[["all_data"]])[trial_type])
temp.aov <- aov(split_template[["all_data"]][[trial_type]][["all"]][,6] ~ split_template[["all_data"]][[trial_type]][["all"]][,16])
print(summary(temp.aov))
print(TukeyHSD(temp.aov))
}
## [1] "high_correct"
## Df Sum Sq Mean Sq
## split_template[["all_data"]][[trial_type]][["all"]][, 16] 2 0.163 0.08131
## Residuals 144 20.961 0.14556
## F value Pr(>F)
## split_template[["all_data"]][[trial_type]][["all"]][, 16] 0.559 0.573
## Residuals
## Tukey multiple comparisons of means
## 95% family-wise confidence level
##
## Fit: aov(formula = split_template[["all_data"]][[trial_type]][["all"]][, 6] ~ split_template[["all_data"]][[trial_type]][["all"]][, 16])
##
## $`split_template[["all_data"]][[trial_type]][["all"]][, 16]`
## diff lwr upr p adj
## med-high 0.02666667 -0.1540409 0.2073742 0.9349292
## low-high -0.05397163 -0.2375402 0.1295969 0.7660405
## low-med -0.08063830 -0.2642068 0.1029302 0.5527402
##
## [1] "high_incorrect"
## Df Sum Sq Mean Sq
## split_template[["all_data"]][[trial_type]][["all"]][, 16] 2 0.636 0.3182
## Residuals 144 20.135 0.1398
## F value Pr(>F)
## split_template[["all_data"]][[trial_type]][["all"]][, 16] 2.276 0.106
## Residuals
## Tukey multiple comparisons of means
## 95% family-wise confidence level
##
## Fit: aov(formula = split_template[["all_data"]][[trial_type]][["all"]][, 6] ~ split_template[["all_data"]][[trial_type]][["all"]][, 16])
##
## $`split_template[["all_data"]][[trial_type]][["all"]][, 16]`
## diff lwr upr p adj
## med-high 0.08333333 -0.09377424 0.26044091 0.5068376
## low-high -0.07865248 -0.25856405 0.10125908 0.5558680
## low-med -0.16198582 -0.34189738 0.01792575 0.0869485
##
## [1] "low_correct"
## Df Sum Sq Mean Sq
## split_template[["all_data"]][[trial_type]][["all"]][, 16] 2 0.522 0.2610
## Residuals 144 21.408 0.1487
## F value Pr(>F)
## split_template[["all_data"]][[trial_type]][["all"]][, 16] 1.756 0.176
## Residuals
## Tukey multiple comparisons of means
## 95% family-wise confidence level
##
## Fit: aov(formula = split_template[["all_data"]][[trial_type]][["all"]][, 6] ~ split_template[["all_data"]][[trial_type]][["all"]][, 16])
##
## $`split_template[["all_data"]][[trial_type]][["all"]][, 16]`
## diff lwr upr p adj
## med-high 0.126666667 -0.05595477 0.30928810 0.2312154
## low-high 0.001843972 -0.18366875 0.18735669 0.9996946
## low-med -0.124822695 -0.31033542 0.06069003 0.2517451
##
## [1] "low_incorrect"
## Df Sum Sq Mean Sq
## split_template[["all_data"]][[trial_type]][["all"]][, 16] 2 0.265 0.1326
## Residuals 29 4.234 0.1460
## F value Pr(>F)
## split_template[["all_data"]][[trial_type]][["all"]][, 16] 0.908 0.414
## Residuals
## 115 observations deleted due to missingness
## Tukey multiple comparisons of means
## 95% family-wise confidence level
##
## Fit: aov(formula = split_template[["all_data"]][[trial_type]][["all"]][, 6] ~ split_template[["all_data"]][[trial_type]][["all"]][, 16])
##
## $`split_template[["all_data"]][[trial_type]][["all"]][, 16]`
## diff lwr upr p adj
## med-high 0.16856061 -0.2699113 0.6070325 0.6140459
## low-high 0.22916667 -0.1948660 0.6531993 0.3879239
## low-med 0.06060606 -0.3259780 0.4471901 0.9209180
If we look averaged over time, we see a significant positive correlation with omnibus span and the difference between classification at corect and incorrect low load trials. We also see significant negative correlation between high load accuacy and decoding at low load incorrect trials, and between BPRS and classification at high load incorrect trials, and a positive cprrelation with the differnce between correct and incorrect at high load trials.
indiv_avg_over_time <- data.frame(high_correct = rowMeans(individual_trial_averages_probs[["high_correct"]][,1:14]),
high_incorrect = rowMeans(individual_trial_averages_probs[["high_incorrect"]][,1:14]),
low_correct = rowMeans(individual_trial_averages_probs[["low_correct"]][,1:14]),
low_incorrect = rowMeans(individual_trial_averages_probs[["low_incorrect"]][,1:14],na.rm=TRUE),
high_load_diff_correct = rowMeans(individual_trial_averages_probs[["high_load_correct_diff"]][,1:14]),
low_load_diff_correct = rowMeans(individual_trial_averages_probs[["low_load_correct_diff"]][,1:14]))
indiv_avg_over_time[is.na(indiv_avg_over_time)] <- NA
indiv_avg_over_time <- data.frame(indiv_avg_over_time, omnibus_span = constructs_fMRI$omnibus_span_no_DFR_MRI[c(1:9,11:170)], PTID = constructs_fMRI$PTID[c(1:9,11:170)])
indiv_avg_over_time <- merge(indiv_avg_over_time, p200_data[,c("PTID","BPRS_TOT","XDFR_MRI_ACC_L3", "XDFR_MRI_ACC_L1")],by="PTID")
plot_list <- list()
for (i in seq.int(1,6)){
plot_data <- indiv_avg_over_time[,c(i+1,8:11)]
colnames(plot_data)[1] <- "prob"
plot_list[["omnibus"]][[i]] <- ggplot(data = plot_data,aes(y=prob,x=omnibus_span))+
geom_point()+
stat_smooth(method="lm")+
xlab("Omnibus Span")+
ggtitle(colnames(indiv_avg_over_time)[i+1])+
theme_classic()
plot_list[["DFR_Acc"]][[i]] <- ggplot(data = plot_data,aes(y=prob,x=XDFR_MRI_ACC_L3))+
geom_point()+
stat_smooth(method="lm")+
xlab("DFR High Load Acc")+
ggtitle(colnames(indiv_avg_over_time)[i+1])+
theme_classic()
plot_list[["BPRS"]][[i]] <- ggplot(data = plot_data,aes(y=prob,x=BPRS_TOT))+
geom_point()+
stat_smooth(method="lm")+
xlab("BPRS")+
ggtitle(colnames(indiv_avg_over_time)[i+1])+
theme_classic()
}
(plot_list[["omnibus"]][[1]] + plot_list[["omnibus"]][[2]]) /
(plot_list[["omnibus"]][[3]] + plot_list[["omnibus"]][[4]]) +
plot_annotation(title="Correlation of face probability and Omnibus Span")
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 20 rows containing non-finite values (stat_smooth).
## Warning: Removed 20 rows containing missing values (geom_point).
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 20 rows containing non-finite values (stat_smooth).
## Warning: Removed 20 rows containing missing values (geom_point).
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 20 rows containing non-finite values (stat_smooth).
## Warning: Removed 20 rows containing missing values (geom_point).
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 137 rows containing non-finite values (stat_smooth).
## Warning: Removed 137 rows containing missing values (geom_point).
(plot_list[["omnibus"]][[5]] + plot_list[["omnibus"]][[6]])+
plot_annotation(title="Correlation of difference in face probability across correctness and Omnibus Span")
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 20 rows containing non-finite values (stat_smooth).
## Warning: Removed 20 rows containing missing values (geom_point).
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 137 rows containing non-finite values (stat_smooth).
## Warning: Removed 137 rows containing missing values (geom_point).
(plot_list[["DFR_Acc"]][[1]] + plot_list[["DFR_Acc"]][[2]]) /
(plot_list[["DFR_Acc"]][[3]] + plot_list[["DFR_Acc"]][[4]]) +
plot_annotation(title="Correlation of face probability and DFR High Load Accuracy")
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 20 rows containing non-finite values (stat_smooth).
## Warning: Removed 20 rows containing missing values (geom_point).
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 20 rows containing non-finite values (stat_smooth).
## Warning: Removed 20 rows containing missing values (geom_point).
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 20 rows containing non-finite values (stat_smooth).
## Warning: Removed 20 rows containing missing values (geom_point).
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 137 rows containing non-finite values (stat_smooth).
## Warning: Removed 137 rows containing missing values (geom_point).
(plot_list[["DFR_Acc"]][[5]] + plot_list[["DFR_Acc"]][[6]])+
plot_annotation(title="Correlation of difference in face probability across correctness and DFR High Load Accuracy")
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 20 rows containing non-finite values (stat_smooth).
## Warning: Removed 20 rows containing missing values (geom_point).
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 137 rows containing non-finite values (stat_smooth).
## Warning: Removed 137 rows containing missing values (geom_point).
(plot_list[["BPRS"]][[1]] + plot_list[["BPRS"]][[2]]) /
(plot_list[["BPRS"]][[3]] + plot_list[["BPRS"]][[4]]) +
plot_annotation(title="Correlation of face probability and BPRS")
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 20 rows containing non-finite values (stat_smooth).
## Warning: Removed 20 rows containing missing values (geom_point).
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 20 rows containing non-finite values (stat_smooth).
## Warning: Removed 20 rows containing missing values (geom_point).
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 20 rows containing non-finite values (stat_smooth).
## Warning: Removed 20 rows containing missing values (geom_point).
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 137 rows containing non-finite values (stat_smooth).
## Warning: Removed 137 rows containing missing values (geom_point).
(plot_list[["BPRS"]][[5]] + plot_list[["BPRS"]][[6]])+
plot_annotation(title="Correlation of difference in face probability across correctness and BPRS")
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 20 rows containing non-finite values (stat_smooth).
## Warning: Removed 20 rows containing missing values (geom_point).
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 137 rows containing non-finite values (stat_smooth).
## Warning: Removed 137 rows containing missing values (geom_point).
cor.test(indiv_avg_over_time$low_incorrect, indiv_avg_over_time$omnibus_span)
##
## Pearson's product-moment correlation
##
## data: indiv_avg_over_time$low_incorrect and indiv_avg_over_time$omnibus_span
## t = -1.9343, df = 30, p-value = 0.06256
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.61077327 0.01775668
## sample estimates:
## cor
## -0.3329991
cor.test(indiv_avg_over_time$low_load_diff_correct, indiv_avg_over_time$omnibus_span)
##
## Pearson's product-moment correlation
##
## data: indiv_avg_over_time$low_load_diff_correct and indiv_avg_over_time$omnibus_span
## t = 2.7378, df = 30, p-value = 0.0103
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## 0.1165930 0.6884683
## sample estimates:
## cor
## 0.4471096
cor.test(indiv_avg_over_time$low_incorrect, indiv_avg_over_time$XDFR_MRI_ACC_L3)
##
## Pearson's product-moment correlation
##
## data: indiv_avg_over_time$low_incorrect and indiv_avg_over_time$XDFR_MRI_ACC_L3
## t = -3.0465, df = 30, p-value = 0.004793
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.7137941 -0.1654301
## sample estimates:
## cor
## -0.4860845
cor.test(indiv_avg_over_time$low_load_diff_correct, indiv_avg_over_time$XDFR_MRI_ACC_L3)
##
## Pearson's product-moment correlation
##
## data: indiv_avg_over_time$low_load_diff_correct and indiv_avg_over_time$XDFR_MRI_ACC_L3
## t = 1.8727, df = 30, p-value = 0.07088
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.02837231 0.60407091
## sample estimates:
## cor
## 0.3235224
cor.test(indiv_avg_over_time$high_incorrect, indiv_avg_over_time$BPRS_TOT)
##
## Pearson's product-moment correlation
##
## data: indiv_avg_over_time$high_incorrect and indiv_avg_over_time$BPRS_TOT
## t = -2.8908, df = 147, p-value = 0.004426
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.3786085 -0.0738839
## sample estimates:
## cor
## -0.2319284
cor.test(indiv_avg_over_time$high_load_diff_correct, indiv_avg_over_time$BPRS_TOT)
##
## Pearson's product-moment correlation
##
## data: indiv_avg_over_time$high_load_diff_correct and indiv_avg_over_time$BPRS_TOT
## t = 2.8789, df = 147, p-value = 0.004587
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## 0.07293081 0.37778733
## sample estimates:
## cor
## 0.2310215
cor.test(indiv_avg_over_time$low_load_diff_correct, indiv_avg_over_time$BPRS_TOT)
##
## Pearson's product-moment correlation
##
## data: indiv_avg_over_time$low_load_diff_correct and indiv_avg_over_time$BPRS_TOT
## t = -1.4829, df = 30, p-value = 0.1485
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.55908295 0.09611656
## sample estimates:
## cor
## -0.2613362
If we look at the patterns over time, we can see that BPRS tends to be negatively related to classification during encoding and probe periods. There is most correlation with accuracy during the encoding period. Span generally has a low correlation with classification probability.
Next step is to pull out some of these correlations and see if they’re significant.
data_to_plot <- merge(constructs_fMRI,p200_data,by="PTID")
data_to_plot <- merge(data_to_plot,p200_clinical_zscores,by="PTID")
data_to_plot <- data_to_plot[c(1:9,11:170),c(1,7,14,15,41,42)]
data_to_plot$ACC_LE <- data_to_plot$XDFR_MRI_ACC_L3 - data_to_plot$XDFR_MRI_ACC_L1
corr_to_behav_plots <- list()
for (i in seq.int(1,6)){
measure_by_time <- data.frame(matrix(nrow=4,ncol=14))
for (measure in seq.int(2,5)){
for (TR in seq.int(1,14)){
measure_by_time[measure-1,TR] <- cor(data_to_plot[,measure],individual_trial_averages_probs[[i]][,TR],use = "pairwise.complete.obs")
}
}
measure_by_time <- data.frame(t(measure_by_time))
measure_by_time$TR <- seq.int(1,14)
colnames(measure_by_time)[1:4] <- colnames(data_to_plot)[2:5]
melted_measure_by_time <- melt(measure_by_time,id.vars="TR")
corr_to_behav_plots[[names(individual_trial_averages_probs)[i]]] <- ggplot(data=melted_measure_by_time,aes(x=TR,y=value))+
geom_line(aes(color=variable))+
scale_x_continuous(breaks = c(1:14),labels=c(1:14))+
ggtitle(names(individual_trial_averages_probs)[i])+
theme_classic()
}
(corr_to_behav_plots[[1]] + corr_to_behav_plots[[2]]) / (corr_to_behav_plots[[3]] + corr_to_behav_plots[[4]])+
plot_layout(guides="collect")+
plot_annotation(title = "Correlation between face classification and behavioral measures")
(corr_to_behav_plots[[5]] + corr_to_behav_plots[[6]])+
plot_layout(guides="collect")+
plot_annotation(title = "Correlation between difference across correctness in face classification and behavioral measures")
plot_list <- list()
for (trial_type in seq.int(1,6)){
temp_plot_data <- merge(p200_data, individual_trial_averages_probs[[trial_type]],by="PTID")
temp_plot_data <- merge(temp_plot_data,constructs_fMRI,by="PTID")
plot_list[["omnibus"]][["cue"]][[trial_type]] <- ggplot(data=temp_plot_data,aes(y=V6,x=omnibus_span_no_DFR_MRI))+
geom_point()+
stat_smooth(method="lm")+
ylab("Probability")+
ggtitle(names(individual_trial_averages_probs)[trial_type])+
theme_classic()
plot_list[["omnibus"]][["delay"]][[trial_type]] <- ggplot(data=temp_plot_data,aes(y=V8,x=omnibus_span_no_DFR_MRI))+
geom_point()+
stat_smooth(method="lm")+
ylab("Probability")+
ggtitle(names(individual_trial_averages_probs)[trial_type])+
theme_classic()
plot_list[["omnibus"]][["probe"]][[trial_type]] <- ggplot(data=temp_plot_data,aes(y=V11,x=omnibus_span_no_DFR_MRI))+
geom_point()+
stat_smooth(method="lm")+
ylab("Probability")+
ggtitle(names(individual_trial_averages_probs)[trial_type])+
theme_classic()
# Acc
plot_list[["L3_Acc"]][["cue"]][[trial_type]] <- ggplot(data=temp_plot_data,aes(y=V6,x=XDFR_MRI_ACC_L3))+
geom_point()+
stat_smooth(method="lm")+
ylab("Probability")+
ggtitle(names(individual_trial_averages_probs)[trial_type])+
theme_classic()
plot_list[["L3_Acc"]][["delay"]][[trial_type]] <- ggplot(data=temp_plot_data,aes(y=V8,x=XDFR_MRI_ACC_L3))+
geom_point()+
stat_smooth(method="lm")+
ylab("Probability")+
ggtitle(names(individual_trial_averages_probs)[trial_type])+
theme_classic()
plot_list[["L3_Acc"]][["probe"]][[trial_type]] <- ggplot(data=temp_plot_data,aes(y=V11,x=XDFR_MRI_ACC_L3))+
geom_point()+
stat_smooth(method="lm")+
ylab("Probability")+
ggtitle(names(individual_trial_averages_probs)[trial_type])+
theme_classic()
# BPRS
plot_list[["BPRS"]][["cue"]][[trial_type]] <- ggplot(data=temp_plot_data,aes(y=V6,x=BPRS_TOT))+
geom_point()+
stat_smooth(method="lm")+
ylab("Probability")+
ggtitle(names(individual_trial_averages_probs)[trial_type])+
theme_classic()
plot_list[["BPRS"]][["delay"]][[trial_type]] <- ggplot(data=temp_plot_data,aes(y=V8,x=BPRS_TOT))+
geom_point()+
stat_smooth(method="lm")+
ylab("Probability")+
ggtitle(names(individual_trial_averages_probs)[trial_type])+
theme_classic()
plot_list[["BPRS"]][["probe"]][[trial_type]] <- ggplot(data=temp_plot_data,aes(y=V11,x=BPRS_TOT))+
geom_point()+
stat_smooth(method="lm")+
ylab("Probability")+
ggtitle(names(individual_trial_averages_probs)[trial_type])+
theme_classic()
}
There is a significant positive relationship between omnibus span and the difference in decoding probability in correct vs incorrect low load trials.
(plot_list[["omnibus"]][["cue"]][[1]] + plot_list[["omnibus"]][["cue"]][[2]]) /
(plot_list[["omnibus"]][["cue"]][[3]] + plot_list[["omnibus"]][["cue"]][[4]]) +
plot_annotation(title = "Omnibus vs Classification at Cue Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 117 rows containing non-finite values (stat_smooth).
## Warning: Removed 117 rows containing missing values (geom_point).
(plot_list[["omnibus"]][["cue"]][[5]] + plot_list[["omnibus"]][["cue"]][[6]]) +
plot_annotation(title = "Omnibus vs Classification at Cue Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 117 rows containing non-finite values (stat_smooth).
## Warning: Removed 117 rows containing missing values (geom_point).
(plot_list[["L3_Acc"]][["cue"]][[1]] + plot_list[["L3_Acc"]][["cue"]][[2]]) /
(plot_list[["L3_Acc"]][["cue"]][[3]] + plot_list[["L3_Acc"]][["cue"]][[4]]) +
plot_annotation(title = "L3_Acc vs Classification at Cue Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 117 rows containing non-finite values (stat_smooth).
## Warning: Removed 117 rows containing missing values (geom_point).
(plot_list[["L3_Acc"]][["cue"]][[5]] + plot_list[["L3_Acc"]][["cue"]][[6]]) +
plot_annotation(title = "L3_Acc vs Classification at Cue Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 117 rows containing non-finite values (stat_smooth).
## Warning: Removed 117 rows containing missing values (geom_point).
(plot_list[["BPRS"]][["cue"]][[1]] + plot_list[["BPRS"]][["cue"]][[2]]) /
(plot_list[["BPRS"]][["cue"]][[3]] + plot_list[["BPRS"]][["cue"]][[4]]) +
plot_annotation(title = "BPRS vs Classification at Cue Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 117 rows containing non-finite values (stat_smooth).
## Warning: Removed 117 rows containing missing values (geom_point).
(plot_list[["BPRS"]][["cue"]][[5]] + plot_list[["BPRS"]][["cue"]][[6]]) +
plot_annotation(title = "BPRS vs Classification at Cue Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 117 rows containing non-finite values (stat_smooth).
## Warning: Removed 117 rows containing missing values (geom_point).
cor.test(individual_trial_averages_probs[["low_incorrect"]]$V6,indiv_avg_over_time$omnibus_span)
##
## Pearson's product-moment correlation
##
## data: individual_trial_averages_probs[["low_incorrect"]]$V6 and indiv_avg_over_time$omnibus_span
## t = -1.7014, df = 30, p-value = 0.09921
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.58484710 0.05804885
## sample estimates:
## cor
## -0.2966497
cor.test(individual_trial_averages_probs[["low_load_correct_diff"]]$V6,indiv_avg_over_time$omnibus_span)
##
## Pearson's product-moment correlation
##
## data: individual_trial_averages_probs[["low_load_correct_diff"]]$V6 and indiv_avg_over_time$omnibus_span
## t = 2.0621, df = 30, p-value = 0.04795
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## 0.004159072 0.624331003
## sample estimates:
## cor
## 0.3523421
cor.test(individual_trial_averages_probs[["low_incorrect"]]$V6,indiv_avg_over_time$XDFR_MRI_ACC_L3)
##
## Pearson's product-moment correlation
##
## data: individual_trial_averages_probs[["low_incorrect"]]$V6 and indiv_avg_over_time$XDFR_MRI_ACC_L3
## t = -1.2045, df = 30, p-value = 0.2378
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.5242154 0.1447535
## sample estimates:
## cor
## -0.2147815
cor.test(individual_trial_averages_probs[["high_incorrect"]]$V6[indiv_avg_over_time$BPRS_TOT < 70],indiv_avg_over_time$BPRS_TOT[indiv_avg_over_time$BPRS_TOT < 70])
##
## Pearson's product-moment correlation
##
## data: individual_trial_averages_probs[["high_incorrect"]]$V6[indiv_avg_over_time$BPRS_TOT < 70] and indiv_avg_over_time$BPRS_TOT[indiv_avg_over_time$BPRS_TOT < 70]
## t = -1.0062, df = 146, p-value = 0.316
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.24110234 0.07942052
## sample estimates:
## cor
## -0.08298671
cor.test(individual_trial_averages_probs[["high_load_correct_diff"]]$V6[indiv_avg_over_time$BPRS_TOT < 70],indiv_avg_over_time$BPRS_TOT[indiv_avg_over_time$BPRS_TOT < 70])
##
## Pearson's product-moment correlation
##
## data: individual_trial_averages_probs[["high_load_correct_diff"]]$V6[indiv_avg_over_time$BPRS_TOT < 70] and indiv_avg_over_time$BPRS_TOT[indiv_avg_over_time$BPRS_TOT < 70]
## t = 1.32, df = 146, p-value = 0.1889
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.05369009 0.26529014
## sample estimates:
## cor
## 0.1085945
cor.test(individual_trial_averages_probs[["low_load_correct_diff"]]$V6[indiv_avg_over_time$BPRS_TOT < 70],indiv_avg_over_time$BPRS_TOT[indiv_avg_over_time$BPRS_TOT < 70])
##
## Pearson's product-moment correlation
##
## data: individual_trial_averages_probs[["low_load_correct_diff"]]$V6[indiv_avg_over_time$BPRS_TOT < 70] and indiv_avg_over_time$BPRS_TOT[indiv_avg_over_time$BPRS_TOT < 70]
## t = -1.5923, df = 29, p-value = 0.1222
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.57964759 0.07870185
## sample estimates:
## cor
## -0.2835456
There is a significant positive relationship between the difference between correct and incorrect trials at low load and span.
There is a significant negative relationship between classification accuracy at low load incorrect trials and accuracy positive relationship between the difference between correct and incorrect trials at low load and accuracy
There is also a trending correlation for the difference between correct and incorrect at high load trials and BPRS (excluding the outlier).
(plot_list[["omnibus"]][["delay"]][[1]] + plot_list[["omnibus"]][["delay"]][[2]]) /
(plot_list[["omnibus"]][["delay"]][[3]] + plot_list[["omnibus"]][["delay"]][[4]]) +
plot_annotation(title = "Omnibus vs Classification at delay Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 117 rows containing non-finite values (stat_smooth).
## Warning: Removed 117 rows containing missing values (geom_point).
(plot_list[["omnibus"]][["delay"]][[5]] + plot_list[["omnibus"]][["delay"]][[6]]) +
plot_annotation(title = "Omnibus vs Classification at delay Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 117 rows containing non-finite values (stat_smooth).
## Warning: Removed 117 rows containing missing values (geom_point).
(plot_list[["L3_Acc"]][["delay"]][[1]] + plot_list[["L3_Acc"]][["delay"]][[2]]) /
(plot_list[["L3_Acc"]][["delay"]][[3]] + plot_list[["L3_Acc"]][["delay"]][[4]]) +
plot_annotation(title = "L3_Acc vs Classification at delay Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 117 rows containing non-finite values (stat_smooth).
## Warning: Removed 117 rows containing missing values (geom_point).
(plot_list[["L3_Acc"]][["delay"]][[5]] + plot_list[["L3_Acc"]][["delay"]][[6]]) +
plot_annotation(title = "L3_Acc vs Classification at delay Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 117 rows containing non-finite values (stat_smooth).
## Warning: Removed 117 rows containing missing values (geom_point).
(plot_list[["BPRS"]][["delay"]][[1]] + plot_list[["BPRS"]][["delay"]][[2]]) /
(plot_list[["BPRS"]][["delay"]][[3]] + plot_list[["BPRS"]][["delay"]][[4]]) +
plot_annotation(title = "BPRS vs Classification at delay Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 117 rows containing non-finite values (stat_smooth).
## Warning: Removed 117 rows containing missing values (geom_point).
(plot_list[["BPRS"]][["delay"]][[5]] + plot_list[["BPRS"]][["delay"]][[6]]) +
plot_annotation(title = "BPRS vs Classification at delay Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 117 rows containing non-finite values (stat_smooth).
## Warning: Removed 117 rows containing missing values (geom_point).
cor.test(individual_trial_averages_probs[["low_incorrect"]]$V8,indiv_avg_over_time$omnibus_span)
##
## Pearson's product-moment correlation
##
## data: individual_trial_averages_probs[["low_incorrect"]]$V8 and indiv_avg_over_time$omnibus_span
## t = -1.6197, df = 30, p-value = 0.1158
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.5753711 0.0722708
## sample estimates:
## cor
## -0.2835692
cor.test(individual_trial_averages_probs[["low_load_correct_diff"]]$V8,indiv_avg_over_time$omnibus)
##
## Pearson's product-moment correlation
##
## data: individual_trial_averages_probs[["low_load_correct_diff"]]$V8 and indiv_avg_over_time$omnibus
## t = 2.2342, df = 30, p-value = 0.03307
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## 0.03339802 0.64185545
## sample estimates:
## cor
## 0.3776935
cor.test(individual_trial_averages_probs[["low_incorrect"]]$V8,indiv_avg_over_time$XDFR_MRI_ACC_L3)
##
## Pearson's product-moment correlation
##
## data: individual_trial_averages_probs[["low_incorrect"]]$V8 and indiv_avg_over_time$XDFR_MRI_ACC_L3
## t = -2.9308, df = 30, p-value = 0.00641
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.7045758 -0.1473371
## sample estimates:
## cor
## -0.4717925
cor.test(individual_trial_averages_probs[["low_load_correct_diff"]]$V8,indiv_avg_over_time$XDFR_MRI_ACC_L3)
##
## Pearson's product-moment correlation
##
## data: individual_trial_averages_probs[["low_load_correct_diff"]]$V8 and indiv_avg_over_time$XDFR_MRI_ACC_L3
## t = 2.6472, df = 30, p-value = 0.01281
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## 0.1019243 0.6805790
## sample estimates:
## cor
## 0.4351528
cor.test(individual_trial_averages_probs[["high_incorrect"]]$V8[indiv_avg_over_time$BPRS_TOT < 70],indiv_avg_over_time$BPRS_TOT[indiv_avg_over_time$BPRS_TOT < 70])
##
## Pearson's product-moment correlation
##
## data: individual_trial_averages_probs[["high_incorrect"]]$V8[indiv_avg_over_time$BPRS_TOT < 70] and indiv_avg_over_time$BPRS_TOT[indiv_avg_over_time$BPRS_TOT < 70]
## t = -1.4363, df = 146, p-value = 0.1531
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.27416187 0.04414516
## sample estimates:
## cor
## -0.1180394
cor.test(individual_trial_averages_probs[["low_incorrect"]]$V8[indiv_avg_over_time$BPRS_TOT < 70],indiv_avg_over_time$BPRS_TOT[indiv_avg_over_time$BPRS_TOT < 70])
##
## Pearson's product-moment correlation
##
## data: individual_trial_averages_probs[["low_incorrect"]]$V8[indiv_avg_over_time$BPRS_TOT < 70] and indiv_avg_over_time$BPRS_TOT[indiv_avg_over_time$BPRS_TOT < 70]
## t = 1.5944, df = 29, p-value = 0.1217
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.07832306 0.57990061
## sample estimates:
## cor
## 0.283896
cor.test(individual_trial_averages_probs[["high_load_correct_diff"]]$V8[indiv_avg_over_time$BPRS_TOT < 70],indiv_avg_over_time$BPRS_TOT[indiv_avg_over_time$BPRS_TOT < 70])
##
## Pearson's product-moment correlation
##
## data: individual_trial_averages_probs[["high_load_correct_diff"]]$V8[indiv_avg_over_time$BPRS_TOT < 70] and indiv_avg_over_time$BPRS_TOT[indiv_avg_over_time$BPRS_TOT < 70]
## t = 1.879, df = 146, p-value = 0.06224
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.007879278 0.307383084
## sample estimates:
## cor
## 0.1536599
cor.test(individual_trial_averages_probs[["low_load_correct_diff"]]$V8[indiv_avg_over_time$BPRS_TOT < 70],indiv_avg_over_time$BPRS_TOT[indiv_avg_over_time$BPRS_TOT < 70])
##
## Pearson's product-moment correlation
##
## data: individual_trial_averages_probs[["low_load_correct_diff"]]$V8[indiv_avg_over_time$BPRS_TOT < 70] and indiv_avg_over_time$BPRS_TOT[indiv_avg_over_time$BPRS_TOT < 70]
## t = -2.0183, df = 29, p-value = 0.05289
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.627282934 0.003873038
## sample estimates:
## cor
## -0.3509487
Omnibus span is significantly positively correlated with the difference between correct/incorrect trials at low load.
Probability of classification at incorrect high load trials is significantly negatively correlated with BPRS at TR 11, and the difference between correct trials at high load trials and BPRS.
(plot_list[["omnibus"]][["probe"]][[1]] + plot_list[["omnibus"]][["probe"]][[2]]) /
(plot_list[["omnibus"]][["probe"]][[3]] + plot_list[["omnibus"]][["probe"]][[4]]) +
plot_annotation(title = "Omnibus vs Classification at probe Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 117 rows containing non-finite values (stat_smooth).
## Warning: Removed 117 rows containing missing values (geom_point).
(plot_list[["omnibus"]][["probe"]][[5]] + plot_list[["omnibus"]][["probe"]][[6]]) +
plot_annotation(title = "Omnibus vs Classification at probe Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 117 rows containing non-finite values (stat_smooth).
## Warning: Removed 117 rows containing missing values (geom_point).
(plot_list[["L3_Acc"]][["probe"]][[1]] + plot_list[["L3_Acc"]][["probe"]][[2]]) /
(plot_list[["L3_Acc"]][["probe"]][[3]] + plot_list[["L3_Acc"]][["probe"]][[4]]) +
plot_annotation(title = "L3_Acc vs Classification at probe Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 117 rows containing non-finite values (stat_smooth).
## Warning: Removed 117 rows containing missing values (geom_point).
(plot_list[["L3_Acc"]][["probe"]][[5]] + plot_list[["L3_Acc"]][["probe"]][[6]]) +
plot_annotation(title = "L3_Acc vs Classification at probe Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 117 rows containing non-finite values (stat_smooth).
## Warning: Removed 117 rows containing missing values (geom_point).
(plot_list[["BPRS"]][["probe"]][[1]] + plot_list[["BPRS"]][["probe"]][[2]]) /
(plot_list[["BPRS"]][["probe"]][[3]] + plot_list[["BPRS"]][["probe"]][[4]]) +
plot_annotation(title = "BPRS vs Classification at probe Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 117 rows containing non-finite values (stat_smooth).
## Warning: Removed 117 rows containing missing values (geom_point).
(plot_list[["BPRS"]][["probe"]][[5]] + plot_list[["BPRS"]][["probe"]][[6]]) +
plot_annotation(title = "BPRS vs Classification at probe Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 117 rows containing non-finite values (stat_smooth).
## Warning: Removed 117 rows containing missing values (geom_point).
cor.test(individual_trial_averages_probs[["low_correct"]]$V11,indiv_avg_over_time$omnibus_span)
##
## Pearson's product-moment correlation
##
## data: individual_trial_averages_probs[["low_correct"]]$V11 and indiv_avg_over_time$omnibus_span
## t = 1.5253, df = 147, p-value = 0.1293
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.03671472 0.28000207
## sample estimates:
## cor
## 0.1248221
cor.test(individual_trial_averages_probs[["low_incorrect"]]$V11,indiv_avg_over_time$omnibus_span)
##
## Pearson's product-moment correlation
##
## data: individual_trial_averages_probs[["low_incorrect"]]$V11 and indiv_avg_over_time$omnibus_span
## t = -1.8269, df = 30, p-value = 0.07767
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.59901786 0.03628242
## sample estimates:
## cor
## -0.3164147
cor.test(individual_trial_averages_probs[["low_load_correct_diff"]]$V11,indiv_avg_over_time$omnibus_span)
##
## Pearson's product-moment correlation
##
## data: individual_trial_averages_probs[["low_load_correct_diff"]]$V11 and indiv_avg_over_time$omnibus_span
## t = 2.8691, df = 30, p-value = 0.007471
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## 0.1375765 0.6995235
## sample estimates:
## cor
## 0.4640109
cor.test(individual_trial_averages_probs[["low_load_correct_diff"]]$V11,indiv_avg_over_time$XDFR_MRI_ACC_L3)
##
## Pearson's product-moment correlation
##
## data: individual_trial_averages_probs[["low_load_correct_diff"]]$V11 and indiv_avg_over_time$XDFR_MRI_ACC_L3
## t = 1.7727, df = 30, p-value = 0.08643
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.04567025 0.59295445
## sample estimates:
## cor
## 0.3079274
cor.test(individual_trial_averages_probs[["high_correct"]]$V11[indiv_avg_over_time$BPRS_TOT < 70],indiv_avg_over_time$BPRS_TOT[indiv_avg_over_time$BPRS_TOT < 70])
##
## Pearson's product-moment correlation
##
## data: individual_trial_averages_probs[["high_correct"]]$V11[indiv_avg_over_time$BPRS_TOT < 70] and indiv_avg_over_time$BPRS_TOT[indiv_avg_over_time$BPRS_TOT < 70]
## t = 0.71831, df = 146, p-value = 0.4737
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.1029872 0.2185936
## sample estimates:
## cor
## 0.0593427
cor.test(individual_trial_averages_probs[["high_incorrect"]]$V11[indiv_avg_over_time$BPRS_TOT < 70],indiv_avg_over_time$BPRS_TOT[indiv_avg_over_time$BPRS_TOT < 70])
##
## Pearson's product-moment correlation
##
## data: individual_trial_averages_probs[["high_incorrect"]]$V11[indiv_avg_over_time$BPRS_TOT < 70] and indiv_avg_over_time$BPRS_TOT[indiv_avg_over_time$BPRS_TOT < 70]
## t = -2.6841, df = 146, p-value = 0.008112
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.36541303 -0.05752308
## sample estimates:
## cor
## -0.2168543
cor.test(individual_trial_averages_probs[["high_load_correct_diff"]]$V11[indiv_avg_over_time$BPRS_TOT < 70],indiv_avg_over_time$BPRS_TOT[indiv_avg_over_time$BPRS_TOT < 70])
##
## Pearson's product-moment correlation
##
## data: individual_trial_averages_probs[["high_load_correct_diff"]]$V11[indiv_avg_over_time$BPRS_TOT < 70] and indiv_avg_over_time$BPRS_TOT[indiv_avg_over_time$BPRS_TOT < 70]
## t = 2.7943, df = 146, p-value = 0.005899
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## 0.06638379 0.37309506
## sample estimates:
## cor
## 0.2253144
behav_classification_corr_list <- list()
for (trial_type in seq.int(1,6)){
group_corrs_omnibus <- data.frame(matrix(nrow=3,ncol=14))
rownames(group_corrs_omnibus) <- names(split_indiv_probs[["all_data"]][[trial_type]])[1:3]
colnames(group_corrs_omnibus) <- seq.int(1,14)
group_corrs_acc <- data.frame(matrix(nrow=3,ncol=14))
rownames(group_corrs_acc) <- names(split_indiv_probs[["all_data"]][[trial_type]])[1:3]
colnames(group_corrs_acc) <- seq.int(1,14)
group_corrs_BPRS <- data.frame(matrix(nrow=3,ncol=14))
rownames(group_corrs_BPRS) <- names(split_indiv_probs[["all_data"]][[trial_type]])[1:3]
colnames(group_corrs_BPRS) <- seq.int(1,14)
for (level in seq.int(1,3)){
temp_subj <- split_indiv_probs[["all_data"]][[trial_type]][[level]][order(split_indiv_probs[["all_data"]][[trial_type]][[level]]$PTID),]
temp_data <- data_to_plot[data_to_plot$PTID %in% split_indiv_probs[["all_data"]][[trial_type]][[level]]$PTID,]
for (TR in seq.int(1,14)){
group_corrs_omnibus[level,TR] <- cor(temp_subj[,TR],temp_data$omnibus_span_no_DFR_MRI,use="pairwise.complete.obs")
group_corrs_acc[level,TR] <- cor(temp_subj[,TR],temp_data$XDFR_MRI_ACC_L3,use="pairwise.complete.obs")
group_corrs_BPRS[level,TR] <- cor(temp_subj[,TR],temp_data$BPRS_TOT.x,use="pairwise.complete.obs")
}
group_corrs_acc$level <- factor(rownames(group_corrs_acc))
group_corrs_BPRS$level <- factor(rownames(group_corrs_acc))
group_corrs_omnibus$level <- factor(rownames(group_corrs_acc))
}
behav_classification_corr_list[["omnibus"]][[names(split_indiv_probs[["all_data"]])[trial_type]]] <- group_corrs_omnibus
behav_classification_corr_list[["BPRS"]][[names(split_indiv_probs[["all_data"]])[trial_type]]] <- group_corrs_BPRS
behav_classification_corr_list[["L3_Acc"]][[names(split_indiv_probs[["all_data"]])[trial_type]]] <- group_corrs_acc
}
behav_classification_corr_melt <- list()
behav_split_plot_list <- list()
for (measure in seq.int(1,3)){
for (trial_type in seq.int(1,6)){
behav_classification_corr_melt[[names(behav_classification_corr_list)[measure]]][[names(behav_classification_corr_list[[measure]])[trial_type]]] <- melt(behav_classification_corr_list[[measure]][[trial_type]],id.vars="level")
behav_classification_corr_melt[[measure]][[trial_type]]$variable <- as.numeric(as.character(behav_classification_corr_melt[[measure]][[trial_type]]$variable))
behav_classification_corr_melt[[measure]][[trial_type]]$level <- factor(behav_classification_corr_melt[[measure]][[trial_type]]$level, levels=c("high","med","low"))
behav_split_plot_list[[names(behav_classification_corr_melt)[measure]]][[names(behav_classification_corr_melt[[measure]])[trial_type]]] <-
ggplot(data = behav_classification_corr_melt[[measure]][[trial_type]],aes(x=variable,y=value))+
geom_line(aes(color=level))+
scale_x_continuous(breaks = c(1:14),labels=c(1:14))+
ggtitle(names(behav_classification_corr_list[[measure]])[trial_type])+
xlab("TR")+
ylab("Correlation")+
theme_classic()
}
}
(behav_split_plot_list[["omnibus"]][[1]] + behav_split_plot_list[["omnibus"]][[2]]) /
(behav_split_plot_list[["omnibus"]][[3]] + behav_split_plot_list[["omnibus"]][[4]])+
plot_annotation("Omnibus Span Correlation with Face Classification Probability by Group")+
plot_layout(guides="collect")
(behav_split_plot_list[["omnibus"]][[5]] + behav_split_plot_list[["omnibus"]][[6]]) +
plot_annotation("Omnibus Span Correlation with Face Classification Probability by Group")+
plot_layout(guides="collect")
(behav_split_plot_list[["L3_Acc"]][[1]] + behav_split_plot_list[["L3_Acc"]][[2]]) /
(behav_split_plot_list[["L3_Acc"]][[3]] + behav_split_plot_list[["L3_Acc"]][[4]])+
plot_annotation("High Load Acc Correlation with Face Classification Probability by Group")+
plot_layout(guides="collect")
(behav_split_plot_list[["L3_Acc"]][[5]] + behav_split_plot_list[["L3_Acc"]][[6]]) +
plot_annotation("High Load Acc Correlation with Face Classification Probability by Group")+
plot_layout(guides="collect")
(behav_split_plot_list[["BPRS"]][[1]] + behav_split_plot_list[["BPRS"]][[2]]) /
(behav_split_plot_list[["BPRS"]][[3]] + behav_split_plot_list[["BPRS"]][[4]])+
plot_annotation("BPRS Total Correlation with Face Classification Probability by Group")+
plot_layout(guides="collect")
(behav_split_plot_list[["BPRS"]][[5]] + behav_split_plot_list[["BPRS"]][[6]]) +
plot_annotation("BPRS Total Correlation with Face Classification Probability by Group")+
plot_layout(guides="collect")
If we average over time, the only significant relationship is a negative correlation between high load accuracy and the difference between correct and incorrect trials at high load and the positive relationship between span and classification at low load incorrect trials.
template_avg_over_time <- data.frame(high_correct = rowMeans(averages_from_template[["high_correct"]][,1:14]),
high_incorrect = rowMeans(averages_from_template[["high_incorrect"]][,1:14]),
low_correct = rowMeans(averages_from_template[["low_correct"]][,1:14]),
low_incorrect = rowMeans(averages_from_template[["low_incorrect"]][,1:14],na.rm=TRUE),
high_load_diff_correct = rowMeans(averages_from_template[["high_load_correct_diff"]][,1:14]),
low_load_diff_correct = rowMeans(averages_from_template[["low_load_correct_diff"]][,1:14]))
template_avg_over_time[is.na(template_avg_over_time)] <- NA
template_avg_over_time <- data.frame(template_avg_over_time, omnibus_span = constructs_fMRI$omnibus_span_no_DFR_MRI[c(1:9,11:170)], PTID = constructs_fMRI$PTID[c(1:9,11:170)])
template_avg_over_time <- merge(template_avg_over_time, p200_data[,c("PTID","BPRS_TOT","XDFR_MRI_ACC_L3", "XDFR_MRI_ACC_L1")],by="PTID")
plot_list <- list()
for (i in seq.int(1,6)){
plot_data <- template_avg_over_time[,c(i+1,8:11)]
colnames(plot_data)[1] <- "prob"
plot_list[["omnibus"]][[i]] <- ggplot(data = plot_data,aes(y=prob,x=omnibus_span))+
geom_point()+
stat_smooth(method="lm")+
xlab("Probability")+
ggtitle(colnames(template_avg_over_time)[i+1])+
theme_classic()
plot_list[["DFR_Acc"]][[i]] <- ggplot(data = plot_data,aes(y=prob,x=XDFR_MRI_ACC_L3))+
geom_point()+
stat_smooth(method="lm")+
xlab("Probability")+
ggtitle(colnames(template_avg_over_time)[i+1])+
theme_classic()
plot_list[["BPRS"]][[i]] <- ggplot(data = plot_data,aes(y=prob,x=BPRS_TOT))+
geom_point()+
stat_smooth(method="lm")+
xlab("Probability")+
ggtitle(colnames(template_avg_over_time)[i+1])+
theme_classic()
}
(plot_list[["omnibus"]][[1]] + plot_list[["omnibus"]][[2]]) /
(plot_list[["omnibus"]][[3]] + plot_list[["omnibus"]][[4]]) +
plot_annotation(title="Correlation of face probability and Omnibus Span")
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 20 rows containing non-finite values (stat_smooth).
## Warning: Removed 20 rows containing missing values (geom_point).
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 20 rows containing non-finite values (stat_smooth).
## Warning: Removed 20 rows containing missing values (geom_point).
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 20 rows containing non-finite values (stat_smooth).
## Warning: Removed 20 rows containing missing values (geom_point).
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 137 rows containing non-finite values (stat_smooth).
## Warning: Removed 137 rows containing missing values (geom_point).
(plot_list[["omnibus"]][[5]] + plot_list[["omnibus"]][[6]])+
plot_annotation(title="Correlation of difference in face probability across correctness and Omnibus Span")
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 20 rows containing non-finite values (stat_smooth).
## Warning: Removed 20 rows containing missing values (geom_point).
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 137 rows containing non-finite values (stat_smooth).
## Warning: Removed 137 rows containing missing values (geom_point).
(plot_list[["DFR_Acc"]][[1]] + plot_list[["DFR_Acc"]][[2]]) /
(plot_list[["DFR_Acc"]][[3]] + plot_list[["DFR_Acc"]][[4]]) +
plot_annotation(title="Correlation of face probability and DFR High Load Accuracy")
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 20 rows containing non-finite values (stat_smooth).
## Warning: Removed 20 rows containing missing values (geom_point).
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 20 rows containing non-finite values (stat_smooth).
## Warning: Removed 20 rows containing missing values (geom_point).
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 20 rows containing non-finite values (stat_smooth).
## Warning: Removed 20 rows containing missing values (geom_point).
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 137 rows containing non-finite values (stat_smooth).
## Warning: Removed 137 rows containing missing values (geom_point).
(plot_list[["DFR_Acc"]][[5]] + plot_list[["DFR_Acc"]][[6]])+
plot_annotation(title="Correlation of difference in face probability across correctness and DFR High Load Accuracy")
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 20 rows containing non-finite values (stat_smooth).
## Warning: Removed 20 rows containing missing values (geom_point).
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 137 rows containing non-finite values (stat_smooth).
## Warning: Removed 137 rows containing missing values (geom_point).
(plot_list[["BPRS"]][[1]] + plot_list[["BPRS"]][[2]]) /
(plot_list[["BPRS"]][[3]] + plot_list[["BPRS"]][[4]]) +
plot_annotation(title="Correlation of face probability and BPRS")
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 20 rows containing non-finite values (stat_smooth).
## Warning: Removed 20 rows containing missing values (geom_point).
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 20 rows containing non-finite values (stat_smooth).
## Warning: Removed 20 rows containing missing values (geom_point).
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 20 rows containing non-finite values (stat_smooth).
## Warning: Removed 20 rows containing missing values (geom_point).
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 137 rows containing non-finite values (stat_smooth).
## Warning: Removed 137 rows containing missing values (geom_point).
(plot_list[["BPRS"]][[5]] + plot_list[["BPRS"]][[6]])+
plot_annotation(title="Correlation of difference in face probability across correctness and BPRS")
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 20 rows containing non-finite values (stat_smooth).
## Warning: Removed 20 rows containing missing values (geom_point).
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 137 rows containing non-finite values (stat_smooth).
## Warning: Removed 137 rows containing missing values (geom_point).
cor.test(template_avg_over_time$high_incorrect, template_avg_over_time$omnibus_span)
##
## Pearson's product-moment correlation
##
## data: template_avg_over_time$high_incorrect and template_avg_over_time$omnibus_span
## t = 1.4199, df = 147, p-value = 0.1578
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.04533175 0.27202812
## sample estimates:
## cor
## 0.1163161
cor.test(template_avg_over_time$low_incorrect, template_avg_over_time$omnibus_span)
##
## Pearson's product-moment correlation
##
## data: template_avg_over_time$low_incorrect and template_avg_over_time$omnibus_span
## t = 2.0599, df = 30, p-value = 0.04817
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## 0.003789419 0.624105381
## sample estimates:
## cor
## 0.3520183
cor.test(template_avg_over_time$high_load_diff_correct, template_avg_over_time$omnibus_span)
##
## Pearson's product-moment correlation
##
## data: template_avg_over_time$high_load_diff_correct and template_avg_over_time$omnibus_span
## t = -1.0643, df = 147, p-value = 0.289
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.24480014 0.07440388
## sample estimates:
## cor
## -0.08744225
cor.test(template_avg_over_time$low_load_diff_correct, template_avg_over_time$omnibus_span)
##
## Pearson's product-moment correlation
##
## data: template_avg_over_time$low_load_diff_correct and template_avg_over_time$omnibus_span
## t = -1.5789, df = 30, p-value = 0.1249
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.57057136 0.07937543
## sample estimates:
## cor
## -0.2769849
cor.test(template_avg_over_time$high_load_diff_correct, template_avg_over_time$XDFR_MRI_ACC_L3)
##
## Pearson's product-moment correlation
##
## data: template_avg_over_time$high_load_diff_correct and template_avg_over_time$XDFR_MRI_ACC_L3
## t = -2.6003, df = 147, p-value = 0.01027
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.35841343 -0.05060548
## sample estimates:
## cor
## -0.209699
data_to_plot <- merge(constructs_fMRI,p200_data,by="PTID")
data_to_plot <- merge(data_to_plot,p200_clinical_zscores,by="PTID")
data_to_plot <- data_to_plot[c(1:9,11:170),c(1,7,14,15,41,42)]
data_to_plot$ACC_LE <- data_to_plot$XDFR_MRI_ACC_L3 - data_to_plot$XDFR_MRI_ACC_L1
corr_to_behav_plots <- list()
for (i in seq.int(1,6)){
measure_by_time <- data.frame(matrix(nrow=4,ncol=14))
for (measure in seq.int(2,5)){
for (TR in seq.int(1,14)){
measure_by_time[measure-1,TR] <- cor(data_to_plot[,measure],averages_from_template[[i]][,TR],use = "pairwise.complete.obs")
}
}
measure_by_time <- data.frame(t(measure_by_time))
measure_by_time$TR <- seq.int(1,14)
colnames(measure_by_time)[1:4] <- colnames(data_to_plot)[2:5]
melted_measure_by_time <- melt(measure_by_time,id.vars="TR")
corr_to_behav_plots[[names(averages_from_template)[i]]] <- ggplot(data=melted_measure_by_time,aes(x=TR,y=value))+
geom_line(aes(color=variable))+
scale_x_continuous(breaks = c(1:14),labels=c(1:14))+
ggtitle(names(averages_from_template)[i])+
theme_classic()
}
(corr_to_behav_plots[[1]] + corr_to_behav_plots[[2]]) / (corr_to_behav_plots[[3]] + corr_to_behav_plots[[4]])+
plot_layout(guides="collect")+
plot_annotation(title = "Correlation between face classification and behavioral measures")
(corr_to_behav_plots[[5]] + corr_to_behav_plots[[6]])+
plot_layout(guides="collect")+
plot_annotation(title = "Correlation between face classification and behavioral measures")
plot_list <- list()
for(trial_type in seq.int(1,6)){
temp_plot_data <- merge(p200_data, averages_from_template[[trial_type]],by="PTID")
temp_plot_data <- merge(temp_plot_data,constructs_fMRI,by="PTID")
plot_list[["omnibus"]][["cue"]][[trial_type]] <- ggplot(data=temp_plot_data,aes(y=V6,x=omnibus_span_no_DFR_MRI))+
geom_point()+
stat_smooth(method="lm")+
ylab("Probability")+
ggtitle(names(averages_from_template)[trial_type])+
theme_classic()
plot_list[["omnibus"]][["delay"]][[trial_type]] <- ggplot(data=temp_plot_data,aes(y=V8,x=omnibus_span_no_DFR_MRI))+
geom_point()+
stat_smooth(method="lm")+
ylab("Probability")+
ggtitle(names(averages_from_template)[trial_type])+
theme_classic()
plot_list[["omnibus"]][["probe"]][[trial_type]] <- ggplot(data=temp_plot_data,aes(y=V11,x=omnibus_span_no_DFR_MRI))+
geom_point()+
stat_smooth(method="lm")+
ylab("Probability")+
ggtitle(names(averages_from_template)[trial_type])+
theme_classic()
# Acc
plot_list[["L3_Acc"]][["cue"]][[trial_type]] <- ggplot(data=temp_plot_data,aes(y=V6,x=XDFR_MRI_ACC_L3))+
geom_point()+
stat_smooth(method="lm")+
ylab("Probability")+
ggtitle(names(averages_from_template)[trial_type])+
theme_classic()
plot_list[["L3_Acc"]][["delay"]][[trial_type]] <- ggplot(data=temp_plot_data,aes(y=V8,x=XDFR_MRI_ACC_L3))+
geom_point()+
stat_smooth(method="lm")+
ylab("Probability")+
ggtitle(names(averages_from_template)[trial_type])+
theme_classic()
plot_list[["L3_Acc"]][["probe"]][[trial_type]] <- ggplot(data=temp_plot_data,aes(y=V11,x=XDFR_MRI_ACC_L3))+
geom_point()+
stat_smooth(method="lm")+
ylab("Probability")+
ggtitle(names(averages_from_template)[trial_type])+
theme_classic()
# BPRS
plot_list[["BPRS"]][["cue"]][[trial_type]] <- ggplot(data=temp_plot_data,aes(y=V6,x=BPRS_TOT))+
geom_point()+
stat_smooth(method="lm")+
ylab("Probability")+
ggtitle(names(averages_from_template)[trial_type])+
theme_classic()
plot_list[["BPRS"]][["delay"]][[trial_type]] <- ggplot(data=temp_plot_data,aes(y=V8,x=BPRS_TOT))+
geom_point()+
stat_smooth(method="lm")+
ylab("Probability")+
ggtitle(names(averages_from_template)[trial_type])+
theme_classic()
plot_list[["BPRS"]][["probe"]][[trial_type]] <- ggplot(data=temp_plot_data,aes(y=V11,x=BPRS_TOT))+
geom_point()+
stat_smooth(method="lm")+
ylab("Probability")+
ggtitle(names(averages_from_template)[trial_type])+
theme_classic()
}
No relationships here.
(plot_list[["omnibus"]][["cue"]][[1]] + plot_list[["omnibus"]][["cue"]][[2]]) /
(plot_list[["omnibus"]][["cue"]][[3]] + plot_list[["omnibus"]][["cue"]][[4]]) +
plot_annotation(title = "Omnibus vs Classification at Cue Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 117 rows containing non-finite values (stat_smooth).
## Warning: Removed 117 rows containing missing values (geom_point).
(plot_list[["omnibus"]][["cue"]][[5]] + plot_list[["omnibus"]][["cue"]][[6]]) +
plot_annotation(title = "Omnibus vs Classification at Cue Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 117 rows containing non-finite values (stat_smooth).
## Warning: Removed 117 rows containing missing values (geom_point).
(plot_list[["L3_Acc"]][["cue"]][[1]] + plot_list[["L3_Acc"]][["cue"]][[2]]) /
(plot_list[["L3_Acc"]][["cue"]][[3]] + plot_list[["L3_Acc"]][["cue"]][[4]]) +
plot_annotation(title = "L3_Acc vs Classification at Cue Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 117 rows containing non-finite values (stat_smooth).
## Warning: Removed 117 rows containing missing values (geom_point).
(plot_list[["L3_Acc"]][["cue"]][[5]] + plot_list[["L3_Acc"]][["cue"]][[6]]) +
plot_annotation(title = "L3_Acc vs Classification at Cue Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 117 rows containing non-finite values (stat_smooth).
## Warning: Removed 117 rows containing missing values (geom_point).
(plot_list[["BPRS"]][["cue"]][[1]] + plot_list[["BPRS"]][["cue"]][[2]]) /
(plot_list[["BPRS"]][["cue"]][[3]] + plot_list[["BPRS"]][["cue"]][[4]]) +
plot_annotation(title = "BPRS vs Classification at Cue Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 117 rows containing non-finite values (stat_smooth).
## Warning: Removed 117 rows containing missing values (geom_point).
(plot_list[["BPRS"]][["cue"]][[5]] + plot_list[["BPRS"]][["cue"]][[6]]) +
plot_annotation(title = "BPRS vs Classification at Cue Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 117 rows containing non-finite values (stat_smooth).
## Warning: Removed 117 rows containing missing values (geom_point).
cor.test(averages_from_template[["high_incorrect"]]$V6,indiv_avg_over_time$omnibus_span)
##
## Pearson's product-moment correlation
##
## data: averages_from_template[["high_incorrect"]]$V6 and indiv_avg_over_time$omnibus_span
## t = 1.7549, df = 147, p-value = 0.08135
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.01796186 0.29720569
## sample estimates:
## cor
## 0.1432518
cor.test(averages_from_template[["low_incorrect"]]$V6,indiv_avg_over_time$omnibus_span)
##
## Pearson's product-moment correlation
##
## data: averages_from_template[["low_incorrect"]]$V6 and indiv_avg_over_time$omnibus_span
## t = -1.0019, df = 30, p-value = 0.3244
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.4974162 0.1800606
## sample estimates:
## cor
## -0.1799305
cor.test(averages_from_template[["high_load_correct_diff"]]$V6,indiv_avg_over_time$omnibus_span)
##
## Pearson's product-moment correlation
##
## data: averages_from_template[["high_load_correct_diff"]]$V6 and indiv_avg_over_time$omnibus_span
## t = -1.003, df = 147, p-value = 0.3175
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.24005799 0.07941233
## sample estimates:
## cor
## -0.08244035
cor.test(averages_from_template[["high_incorrect"]]$V6,indiv_avg_over_time$XDFR_MRI_ACC_L3)
##
## Pearson's product-moment correlation
##
## data: averages_from_template[["high_incorrect"]]$V6 and indiv_avg_over_time$XDFR_MRI_ACC_L3
## t = 1.4824, df = 147, p-value = 0.1404
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.04022022 0.27676344
## sample estimates:
## cor
## 0.1213646
cor.test(averages_from_template[["low_correct"]]$V6,indiv_avg_over_time$XDFR_MRI_ACC_L3)
##
## Pearson's product-moment correlation
##
## data: averages_from_template[["low_correct"]]$V6 and indiv_avg_over_time$XDFR_MRI_ACC_L3
## t = 1.731, df = 147, p-value = 0.08555
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.01991356 0.29542472
## sample estimates:
## cor
## 0.141339
There are no significant relationships
(plot_list[["omnibus"]][["delay"]][[1]] + plot_list[["omnibus"]][["delay"]][[2]]) /
(plot_list[["omnibus"]][["delay"]][[3]] + plot_list[["omnibus"]][["delay"]][[4]]) +
plot_annotation(title = "Omnibus vs Classification at delay Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 117 rows containing non-finite values (stat_smooth).
## Warning: Removed 117 rows containing missing values (geom_point).
(plot_list[["omnibus"]][["delay"]][[5]] + plot_list[["omnibus"]][["delay"]][[6]]) +
plot_annotation(title = "Omnibus vs Classification at delay Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 117 rows containing non-finite values (stat_smooth).
## Warning: Removed 117 rows containing missing values (geom_point).
(plot_list[["L3_Acc"]][["delay"]][[1]] + plot_list[["L3_Acc"]][["delay"]][[2]]) /
(plot_list[["L3_Acc"]][["delay"]][[3]] + plot_list[["L3_Acc"]][["delay"]][[4]]) +
plot_annotation(title = "L3_Acc vs Classification at delay Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 117 rows containing non-finite values (stat_smooth).
## Warning: Removed 117 rows containing missing values (geom_point).
(plot_list[["L3_Acc"]][["delay"]][[5]] + plot_list[["L3_Acc"]][["delay"]][[6]]) +
plot_annotation(title = "L3_Acc vs Classification at delay Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 117 rows containing non-finite values (stat_smooth).
## Warning: Removed 117 rows containing missing values (geom_point).
(plot_list[["BPRS"]][["delay"]][[1]] + plot_list[["BPRS"]][["delay"]][[2]]) /
(plot_list[["BPRS"]][["delay"]][[3]] + plot_list[["BPRS"]][["delay"]][[4]]) +
plot_annotation(title = "BPRS vs Classification at delay Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 117 rows containing non-finite values (stat_smooth).
## Warning: Removed 117 rows containing missing values (geom_point).
(plot_list[["BPRS"]][["delay"]][[5]] + plot_list[["BPRS"]][["delay"]][[6]]) +
plot_annotation(title = "BPRS vs Classification at delay Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 117 rows containing non-finite values (stat_smooth).
## Warning: Removed 117 rows containing missing values (geom_point).
cor.test(averages_from_template[["high_load_correct_diff"]]$V8,indiv_avg_over_time$omnibus_span)
##
## Pearson's product-moment correlation
##
## data: averages_from_template[["high_load_correct_diff"]]$V8 and indiv_avg_over_time$omnibus_span
## t = 1.0678, df = 147, p-value = 0.2873
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.07411185 0.24507617
## sample estimates:
## cor
## 0.08773364
cor.test(averages_from_template[["high_correct"]]$V8,indiv_avg_over_time$XDFR_MRI_ACC_L3)
##
## Pearson's product-moment correlation
##
## data: averages_from_template[["high_correct"]]$V8 and indiv_avg_over_time$XDFR_MRI_ACC_L3
## t = -1.75, df = 147, p-value = 0.0822
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.29684005 0.01836276
## sample estimates:
## cor
## -0.142859
There are no significant relationships at probe.
(plot_list[["omnibus"]][["probe"]][[1]] + plot_list[["omnibus"]][["probe"]][[2]]) /
(plot_list[["omnibus"]][["probe"]][[3]] + plot_list[["omnibus"]][["probe"]][[4]]) +
plot_annotation(title = "Omnibus vs Classification at probe Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 117 rows containing non-finite values (stat_smooth).
## Warning: Removed 117 rows containing missing values (geom_point).
(plot_list[["omnibus"]][["probe"]][[5]] + plot_list[["omnibus"]][["probe"]][[6]]) +
plot_annotation(title = "Omnibus vs Classification at probe Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 117 rows containing non-finite values (stat_smooth).
## Warning: Removed 117 rows containing missing values (geom_point).
(plot_list[["L3_Acc"]][["probe"]][[1]] + plot_list[["L3_Acc"]][["probe"]][[2]]) /
(plot_list[["L3_Acc"]][["probe"]][[3]] + plot_list[["L3_Acc"]][["probe"]][[4]]) +
plot_annotation(title = "L3_Acc vs Classification at probe Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 117 rows containing non-finite values (stat_smooth).
## Warning: Removed 117 rows containing missing values (geom_point).
(plot_list[["L3_Acc"]][["probe"]][[5]] + plot_list[["L3_Acc"]][["probe"]][[6]]) +
plot_annotation(title = "L3_Acc vs Classification at probe Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 117 rows containing non-finite values (stat_smooth).
## Warning: Removed 117 rows containing missing values (geom_point).
(plot_list[["BPRS"]][["probe"]][[1]] + plot_list[["BPRS"]][["probe"]][[2]]) /
(plot_list[["BPRS"]][["probe"]][[3]] + plot_list[["BPRS"]][["probe"]][[4]]) +
plot_annotation(title = "BPRS vs Classification at probe Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 117 rows containing non-finite values (stat_smooth).
## Warning: Removed 117 rows containing missing values (geom_point).
(plot_list[["BPRS"]][["probe"]][[5]] + plot_list[["BPRS"]][["probe"]][[6]]) +
plot_annotation(title = "BPRS vs Classification at probe Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 117 rows containing non-finite values (stat_smooth).
## Warning: Removed 117 rows containing missing values (geom_point).
cor.test(averages_from_template[["high_correct"]]$V11,indiv_avg_over_time$omnibus_span)
##
## Pearson's product-moment correlation
##
## data: averages_from_template[["high_correct"]]$V11 and indiv_avg_over_time$omnibus_span
## t = -1.7808, df = 147, p-value = 0.07701
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.29913111 0.01584917
## sample estimates:
## cor
## -0.1453211
cor.test(averages_from_template[["high_load_correct_diff"]]$V11,indiv_avg_over_time$omnibus_span)
##
## Pearson's product-moment correlation
##
## data: averages_from_template[["high_load_correct_diff"]]$V11 and indiv_avg_over_time$omnibus_span
## t = -1.2164, df = 147, p-value = 0.2258
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.25650641 0.06197088
## sample estimates:
## cor
## -0.09982381
behav_classification_corr_list <- list()
for (trial_type in seq.int(1,6)){
group_corrs_omnibus <- data.frame(matrix(nrow=3,ncol=14))
rownames(group_corrs_omnibus) <- names(split_template[["all_data"]][[trial_type]])[1:3]
colnames(group_corrs_omnibus) <- seq.int(1,14)
group_corrs_acc <- data.frame(matrix(nrow=3,ncol=14))
rownames(group_corrs_acc) <- names(split_template[["all_data"]][[trial_type]])[1:3]
colnames(group_corrs_acc) <- seq.int(1,14)
group_corrs_BPRS <- data.frame(matrix(nrow=3,ncol=14))
rownames(group_corrs_BPRS) <- names(split_template[["all_data"]][[trial_type]])[1:3]
colnames(group_corrs_BPRS) <- seq.int(1,14)
for (level in seq.int(1,3)){
temp_subj <- split_template[["all_data"]][[trial_type]][[level]][order(split_template[["all_data"]][[trial_type]][[level]]$PTID),]
temp_data <- data_to_plot[data_to_plot$PTID %in% split_template[["all_data"]][[trial_type]][[level]]$PTID,]
for (TR in seq.int(1,14)){
group_corrs_omnibus[level,TR] <- cor(temp_subj[,TR],temp_data$omnibus_span_no_DFR_MRI,use="pairwise.complete.obs")
group_corrs_acc[level,TR] <- cor(temp_subj[,TR],temp_data$XDFR_MRI_ACC_L3,use="pairwise.complete.obs")
group_corrs_BPRS[level,TR] <- cor(temp_subj[,TR],temp_data$BPRS_TOT.x,use="pairwise.complete.obs")
}
group_corrs_acc$level <- factor(rownames(group_corrs_acc))
group_corrs_BPRS$level <- factor(rownames(group_corrs_acc))
group_corrs_omnibus$level <- factor(rownames(group_corrs_acc))
}
behav_classification_corr_list[["omnibus"]][[names(split_template[["all_data"]])[trial_type]]] <- group_corrs_omnibus
behav_classification_corr_list[["BPRS"]][[names(split_template[["all_data"]])[trial_type]]] <- group_corrs_BPRS
behav_classification_corr_list[["L3_Acc"]][[names(split_template[["all_data"]])[trial_type]]] <- group_corrs_acc
}
behav_classification_corr_melt <- list()
behav_split_plot_list <- list()
for (measure in seq.int(1,3)){
for (trial_type in seq.int(1,6)){
behav_classification_corr_melt[[names(behav_classification_corr_list)[measure]]][[names(behav_classification_corr_list[[measure]])[trial_type]]] <- melt(behav_classification_corr_list[[measure]][[trial_type]],id.vars="level")
behav_classification_corr_melt[[measure]][[trial_type]]$variable <- as.numeric(as.character(behav_classification_corr_melt[[measure]][[trial_type]]$variable))
behav_classification_corr_melt[[measure]][[trial_type]]$level <- factor(behav_classification_corr_melt[[measure]][[trial_type]]$level, levels=c("high","med","low"))
behav_split_plot_list[[names(behav_classification_corr_melt)[measure]]][[names(behav_classification_corr_melt[[measure]])[trial_type]]] <-
ggplot(data = behav_classification_corr_melt[[measure]][[trial_type]],aes(x=variable,y=value))+
geom_line(aes(color=level))+
scale_x_continuous(breaks = c(1:14),labels=c(1:14))+
ggtitle(names(behav_classification_corr_list[[measure]])[trial_type])+
xlab("TR")+
ylab("Correlation")+
theme_classic()
}
}
(behav_split_plot_list[["omnibus"]][[1]] + behav_split_plot_list[["omnibus"]][[2]]) /
(behav_split_plot_list[["omnibus"]][[3]] + behav_split_plot_list[["omnibus"]][[4]])+
plot_annotation("Omnibus Span Correlation with Face Classification Probability by Group")+
plot_layout(guides="collect")
(behav_split_plot_list[["omnibus"]][[5]] + behav_split_plot_list[["omnibus"]][[6]]) +
plot_annotation("Omnibus Span Correlation with Face Classification Probability by Group")+
plot_layout(guides="collect")
(behav_split_plot_list[["L3_Acc"]][[1]] + behav_split_plot_list[["L3_Acc"]][[2]]) /
(behav_split_plot_list[["L3_Acc"]][[3]] + behav_split_plot_list[["L3_Acc"]][[4]])+
plot_annotation("High Load Acc Correlation with Face Classification Probability by Group")+
plot_layout(guides="collect")
(behav_split_plot_list[["L3_Acc"]][[5]] + behav_split_plot_list[["L3_Acc"]][[6]]) +
plot_annotation("High Load Acc Correlation with Face Classification Probability by Group")+
plot_layout(guides="collect")
(behav_split_plot_list[["BPRS"]][[1]] + behav_split_plot_list[["BPRS"]][[2]]) /
(behav_split_plot_list[["BPRS"]][[3]] + behav_split_plot_list[["BPRS"]][[4]])+
plot_annotation("BPRS Total Correlation with Face Classification Probability by Group")+
plot_layout(guides="collect")
(behav_split_plot_list[["BPRS"]][[5]] + behav_split_plot_list[["BPRS"]][[6]]) +
plot_annotation("BPRS Total with Face Classification Probability by Group")+
plot_layout(guides="collect")